## [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
## num [1:7] 1 4 7 10 13 16 19
## [1] 1 2 3 4 100 58 5568
## num [1:7] 1 2 3 4 100 ...
## [1] "hey!"
## [1] "jon" "Peter" "Sam"
## chr [1:3] "jon" "Peter" "Sam"
## chr [1:4] "Dec" "May" "Apr" "Dec"
## Factor w/ 3 levels "Apr","Dec","May": 2 3 1 2
## birth_month
## Apr Dec May
## 1 2 1
month_levels <- c("Jan","Feb","Mar","Apr",
"May","Jun", "Jul","Aug",
"Sep","Oct","Nov", "Dec")
birth_month <- factor(birth_month,
levels = month_levels)
str(birth_month)## Factor w/ 12 levels "Jan","Feb","Mar",..: 12 5 4 12
## [1] Dec May Apr
## Levels: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
## birth_month
## Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
## 0 0 0 1 1 0 0 0 0 0 0 2
Can view help by vignette("dplyr") and vignette("two-table") or check out the online docs dplyr is a part of tidyverse
Can Use | as or
##
## Aboriginal Arabic Aramaic Bosnian Cantonese
## 12 2 5 1 1 11
## Chinese Czech Danish Dari Dutch Dzongkha
## 3 1 5 2 4 1
## English Filipino French German Greek Hebrew
## 4704 1 73 19 1 5
## Hindi Hungarian Icelandic Indonesian Italian Japanese
## 28 1 2 2 11 18
## Kannada Kazakh Korean Mandarin Maya Mongolian
## 1 1 8 26 1 1
## None Norwegian Panjabi Persian Polish Portuguese
## 2 4 1 4 4 8
## Romanian Russian Slovenian Spanish Swahili Swedish
## 2 11 1 40 1 5
## Tamil Telugu Thai Urdu Vietnamese Zulu
## 1 1 3 1 1 2
moviesSub <- movies %>% filter(language == "English" | language == "Spanish")
moviesBig_Spanish <- movies %>% filter(language == "Spanish" | budget > 1e10)
movies_eng_sp <- movies %>%
filter(language == "English" |
language == "Spanish") %>%
filter(budget > 100000000)
dim(movies_eng_sp)## [1] 308 28
Order the rows
Only see certain rows
Used to pick out certain variables
## [1] 5043 4
## [1] "movie_title" "director_name" "gross" "budget"
## [1] "movie_title" "director_name"
## [3] "gross" "budget"
## [5] "color" "num_critic_for_reviews"
## [7] "duration" "director_facebook_likes"
## [9] "actor_3_facebook_likes" "actor_2_name"
## [11] "actor_1_facebook_likes" "genres"
## [13] "actor_1_name" "num_voted_users"
## [15] "cast_total_facebook_likes" "actor_3_name"
## [17] "facenumber_in_poster" "plot_keywords"
## [19] "movie_imdb_link" "num_user_for_reviews"
## [21] "language" "country"
## [23] "content_rating" "title_year"
## [25] "actor_2_facebook_likes" "imdb_score"
## [27] "aspect_ratio" "movie_facebook_likes"
starts_with(): Starts with a prefix.ends_with(): Ends with a suffix.contains(): Contains a literal string.matches(): Matches a regular expression.num_range(): Matches a numerical range like x01, x02, x03.one_of(): Matches variable names in a character vector.everything(): Matches all variables.last_col(): Select last variable, possibly with an offset.## [1] "movie_title" "director"
## [3] "gross" "budget"
## [5] "color" "num_critic_for_reviews"
## [7] "duration" "director_facebook_likes"
## [9] "actor_3_facebook_likes" "actor_2_name"
## [11] "actor_1_facebook_likes" "genres"
## [13] "actor_1_name" "num_voted_users"
## [15] "cast_total_facebook_likes" "actor_3_name"
## [17] "facenumber_in_poster" "plot_keywords"
## [19] "movie_imdb_link" "num_user_for_reviews"
## [21] "language" "country"
## [23] "content_rating" "title_year"
## [25] "actor_2_facebook_likes" "imdb_score"
## [27] "aspect_ratio" "movie_facebook_likes"
Director_Tot <- movies %>% group_by(director) %>%
summarize(grossTotDir = sum(grossM, na.rm = TRUE))
head(Director_Tot)Director_Stats <- movies %>% group_by(director) %>%
summarize(
n = n(),
min = min(grossM),
max = max(grossM),
avg = mean(grossM),
sd = sd(grossM))
Director_Stats %>% arrange(desc(max)) %>% slice(1:10)movies <- movies %>%
mutate(genre_main = unlist(map(strsplit(as.character(movies$genres),"\\|"),1)),
grossM = gross / 1000000,
budgetM = budget / 1000000,
profitM = grossM - budgetM,
ROI = profitM/budgetM)
movies <- movies %>% mutate(genre_main = factor(genre_main) %>% fct_drop())
Director_Avg <- movies %>%
group_by(director) %>%
summarize(num_movies = n(),
grossAvgDir = mean(grossM,
na.rm = TRUE),
profitAvgDir = mean(profit,
na.rm = TRUE))
Director_Avg %>% arrange(desc(profitAvgDir)) %>%
filter(num_movies > 1) %>%
slice(1:20)Director_Tot <- movies %>% group_by(director) %>%
summarize(grossTotDir = sum(grossM, na.rm = TRUE))
genre_collapse <- movies %>% group_by(genre_main) %>%
summarize(Avg_ROI_genre = mean(ROI, na.rm = TRUE),
SD_ROI_genre = sd(ROI, na.rm = TRUE),
SE_ROI_genre = sd(ROI, na.rm = TRUE)/sqrt(n()),
num_films = n())
actor_sum <- movies %>% group_by(actor_1_name) %>%
summarize(Avg_ROI_actor = mean(ROI, na.rm = TRUE),
SD_ROI_actor = sd(ROI, na.rm = TRUE),
SE_ROI_actor = sd(ROI, na.rm = TRUE)/sqrt(n()),
num_films = n())
actor_sum %>% filter(num_films > 2) %>% top_n(5, wt = Avg_ROI_actor)## [1] 0.2909999 0.2755208 2.0667876 12.3284782 0.4697230 0.2315017
## [7] 1.6732844 0.2963527 7.1413626 1.1121905 NA NA
## [13] 40.1424230 11.4088478 0.4763281 1.0834822 0.6181190 1.4418917
## [19] 0.4705548
Can view help by vignette(“magrittr”) or check out the online docs magrittr is a part of tidyverse
We start with a value, here mtcars (a data.frame). Based on this, we first extract a subset, then we aggregate the information based on the number of cylinders, and then we transform the dataset by adding a variable for kilometers per liter as supplement to miles per gallon. Finally we print the result before assigning it. Note how the code is arranged in the logical order of how you think about the task: data->transform->aggregate, which is also the same order as the code will execute. It’s like a recipe – easy to read, easy to follow!
library(magritter)
car_data <-
mtcars %>%
subset(hp > 100) %>%
aggregate(. ~ cyl, data = ., FUN = . %>% mean %>% round(2)) %>%
transform(kpl = mpg %>% multiply_by(0.4251)) %>%
print## cyl mpg disp hp drat wt qsec vs am gear carb kpl
## 1 4 25.90 108.05 111.00 3.94 2.15 17.75 1.00 1.00 4.50 2.00 11.010090
## 2 6 19.74 183.31 122.29 3.59 3.12 17.98 0.57 0.43 3.86 3.43 8.391474
## 3 8 15.10 353.10 209.21 3.23 4.00 16.77 0.00 0.14 3.29 3.50 6.419010
Note also how “building” a function on the fly for use in aggregate is very simple in magrittr: rather than an actual value as left-hand side in pipeline, just use the placeholder. This is also very useful in R’s *apply family of functions.
The combined example shows a few neat features of the pipe (which it is not):
%>% may be used in a nested fashion, e.g. it may appear in expressions within arguments. This is used in the mpg to kpl conversion.One feature, which was not utilized above is piping into anonymous functions, or lambdas. This is possible using standard function definitions, e.g.
However, magrittr also allows a short-hand notation:
The “tee” operator, %T>% works like %>%, except it returns the left-hand side value, and not the result of the right-hand side operation. This is useful when a step in a pipeline is used for its side-effect (printing, plotting, logging, etc.). As an example:
## [1] -20.685227 5.665451
The “exposition” pipe operator, %$% exposes the names within the left-hand side object to the right-hand side expression. Essentially, it is a short-hand for using the with functions (and the same left-hand side objects are accepted). This operator is handy when functions do not themselves have a data argument, as for example lm and aggregate do. Here are a few examples as illustration:
## [1] 0.3361992
Finally, the compound assignment pipe operator %<>% can be used as the first pipe in a chain. The effect will be that the result of the pipeline is assigned to the left-hand side object, rather than returning the result as usual. It is essentially shorthand notation for expressions like foo <- foo %>% bar %>% baz, which boils down to foo %<>% bar %>% baz. Another example is
The %<>% can be used whenever expr <- … makes sense, e.g.
x %<>% foo %>% barx[1:10] %<>% foo %>% barx$baz %<>% foo %>% barggplot2 is a system for declaratively creating graphics, based on The Grammar of Graphics. You provide the data, tell ggplot2 how to map variables to aesthetics, what graphical primitives to use, and it takes care of the details.
It’s hard to succinctly describe how ggplot2 works because it embodies a deep philosophy of visualisation. However, in most cases you start with ggplot(), supply a dataset and aesthetic mapping (with aes()). You then add on layers (like geom_point() or geom_histogram()), scales (like scale_colour_brewer()), faceting specifications (like facet_wrap()) and coordinate systems (like coord_flip()).
geom_bar makes the height of the bar proportional to the number of cases in each group (or if the weight aesthetic is supplied, the sum of the weights). If you want the heights of the bars to represent values in the data, use geom_col() instead. geom_bar() uses stat_count() by default: it counts the number of cases at each x position. geom_col() uses stat_identity(): it leaves the data as is.
ggplot(gmsc_train,
aes(y = SeriousDlqin2yrs, x = age)) +
geom_bar(stat = "identity", fill = "red") +
ggtitle("Serious Delinquencies in 2 years vs By Age") +
labs(x = "Age", y = "Serious Delinquencies in 2 years") ggplot(gmsc_train,
aes(y = SeriousDlqin2yrs,
x = NumberRealEstateLoansOrLines)) +
geom_col(fill = "red") +
ggtitle("Serious Delinquencies in 2 years vs Number of Loans or Lines") +
labs(x = "Number of Real Estate Loans Or Lines", y = "Serious Delinquencies in 2 years")The jitter geom is a convenient shortcut for geom_point(position = "jitter"). It adds a small amount of random variation to the location of each point, and is a useful way of handling overplotting caused by discreteness in smaller datasets.
ggplot(gmsc_train, aes( x = MonthlyIncome,
y = SeriousDlqin2yrs)) +
geom_jitter(alpha = 1/10, color = "hotpink") +
xlim(0, 100000) +
ggtitle("Serious Delinquencies in 2 years vs Monthly Income") +
labs(x = "Monthly Income", y = "Serious Delinquencies in 2 years")## Warning: Removed 18391 rows containing missing values (geom_point).
Visualise the distribution of a single continuous variable by dividing the x axis into bins and counting the number of observations in each bin. Histograms (geom_histogram()) display the counts with bars; frequency polygons (geom_freqpoly()) display the counts with lines. Frequency polygons are more suitable when you want to compare the distribution across the levels of a categorical variable.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
mpg %>%
ggplot( aes(x=reorder(class, hwy), y=hwy, fill=class)) +
geom_boxplot() +
xlab("class") +
theme(legend.position="none")TODO : Write about lines
TODO : Write about Error Bars
TODO: Write about themes
You can use str to get info about what is contained in a model ie: str(mod1) ## Setup Test/Train
train_idx <- sample(1:nrow(movies),size = floor(0.75*nrow(movies)))
movies_train <- movies %>% slice(train_idx)
movies_test <- movies %>% slice(-train_idx)Where 0.75 is the percentage (75%) of the data to put in the Training set.
Linear regression is a linear approach to modeling the relationship between a scalar response (or dependent variable) and one or more explanatory variables (or independent variables). The case of one explanatory variable is called simple linear regression. Generate a linear model with lm(), desired formula is written with the dependant variable followed by ~ and then a list of the independant variables Can use . for all, or do something like y ~ -director Can get the coefficients like this mod1$coefficients[1]
##
## Call:
## lm(formula = gross ~ budget + duration, data = movies_train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -404027478 -23904513 -9478641 11699873 491872401
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -1.365e+07 5.087e+06 -2.683 0.00733 **
## budget 1.009e+00 2.445e-02 41.270 < 2e-16 ***
## duration 2.424e+05 4.710e+04 5.147 2.82e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 55500000 on 2906 degrees of freedom
## (495 observations deleted due to missingness)
## Multiple R-squared: 0.4102, Adjusted R-squared: 0.4098
## F-statistic: 1011 on 2 and 2906 DF, p-value: < 2.2e-16
Logistic regression is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). Use function glm() notice the family = binomial
library(ISLR)
data(Default)
options(scipen=9)
logitMod1 <- glm(factor(default) ~ balance,
family = binomial,
data = Default)
summary(logitMod1)##
## Call:
## glm(formula = factor(default) ~ balance, family = binomial, data = Default)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.2697 -0.1465 -0.0589 -0.0221 3.7589
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -10.6513306 0.3611574 -29.49 <2e-16 ***
## balance 0.0054989 0.0002204 24.95 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 2920.6 on 9999 degrees of freedom
## Residual deviance: 1596.5 on 9998 degrees of freedom
## AIC: 1600.5
##
## Number of Fisher Scoring iterations: 8
## (Intercept) balance
## -10.6513 0.0055
## 1 2 3 4 5
## 0.0013056797 0.0021125949 0.0085947405 0.0004344368 0.0017769574
## 6
## 0.0037041528
## Loading required package: lattice
##
## Attaching package: 'caret'
## The following object is masked from 'package:purrr':
##
## lift
confusionMatrix(factor(ifelse(preds_DF$scores_mod1 > 0.5,"Yes","No"),
levels = c("Yes", "No")),
factor(preds_DF$default,
levels = c("Yes","No") ) )## Confusion Matrix and Statistics
##
## Reference
## Prediction Yes No
## Yes 100 42
## No 233 9625
##
## Accuracy : 0.9725
## 95% CI : (0.9691, 0.9756)
## No Information Rate : 0.9667
## P-Value [Acc > NIR] : 0.0004973
##
## Kappa : 0.4093
##
## Mcnemar's Test P-Value : < 2.2e-16
##
## Sensitivity : 0.3003
## Specificity : 0.9957
## Pos Pred Value : 0.7042
## Neg Pred Value : 0.9764
## Prevalence : 0.0333
## Detection Rate : 0.0100
## Detection Prevalence : 0.0142
## Balanced Accuracy : 0.6480
##
## 'Positive' Class : Yes
##
TrainDF <- data.frame(default = c(Default$default, Default$default),
scores = c(preds_DF$scores_mod1,
preds_DF$scores_mod2),
models = c(rep("X = Student",length(preds_DF$scores_mod1)),
rep("X = Student + Balance + Income",
length(preds_DF$scores_mod2))))
library(ggplot2)
library('plotROC')
TrainROC <- ggplot(TrainDF, aes(m = scores, d = default, color = models)) +
geom_roc(show.legend = TRUE, labelsize = 3.5, cutoffs.at = c(.99,.9,.7,.5,.3,.1,0))
TrainROC <- TrainROC + style_roc(theme = theme_grey) +
theme(axis.text = element_text(colour = "blue")) +
theme(legend.justification = c(1, 0),
legend.position = c(1, 0),
legend.box.margin=margin(c(50,50,50,50)))
plot(TrainROC)## Warning in verify_d(data$d): D not labeled 0/1, assuming 1 = 0 and 2 = 1!
load library(plyr) before library(tidyverse)
scores3DF <- data.frame(default = ifelse(Default$default == "Yes",1,0),
scores = preds_DF$scores_mod2)
calData <- ddply(scores3DF, .(cut(scores3DF$scores, c(0,0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1))), colwise(mean))
calData$midpoint <- c(0.025,.1,.2,.3,.4,.5,.6,.7,.8,.9,.975)
colnames(calData) <- c("preds", "true", "midpoint")
calPlot <- ggplot(calData, aes(x = midpoint, y = true)) + geom_point() + ylim(0,1) +
geom_abline(intercept = 0, slope = 1, color = "red") +
xlab("Prediction midpoint") + ylab("Observed event percentage"
)
plot(calPlot)R uses factors to handle categorical variables, variables that have a fixed and known set of possible values. Factors are also helpful for reordering character vectors to improve display. The goal of the forcats package is to provide a suite of tools that solve common problems with factors, including changing the order of levels or the values. Some examples include:
fct_reorder(): Reordering a factor by another variable.fct_infreq(): Reordering a factor by the frequency of values.fct_relevel(): Changing the order of a factor by hand.fct_lump(): Collapsing the least/most frequent values of a factor into “other”.